The main module for accessing objects is regreg.api.


In [1]:
import numpy as np
import regreg.api as rr
from IPython.display import Math

We will compare our fitted values to Rs output from lars.


In [2]:
%load_ext rmagic


The rmagic extension is already loaded. To reload it, use:
  %reload_ext rmagic

In [3]:
%%R -o X,Y
library(lars)
data(diabetes)
X = diabetes$x
Y = diabetes$y


Loaded lars 1.1

For a given $X, Y$, the squared error loss


In [15]:
loss = rr.squared_error(X, Y)
Math(loss.latexify())


Out[15]:
$$C\left\|X_{}\beta - Y_{}\right\|^2_2$$

In [16]:
penalty_lagrange = rr.l1norm(10, lagrange=200)
Math(penalty_lagrange.latexify())
problem_lagrange = rr.simple_problem(loss, penalty_lagrange)
coef_lagrange = problem_lagrange.solve(tol=1.e-12)
print(coef_lagrange)


[  -0.            0.         -479.01880416 -149.17252025    0.            0.
   71.22656102   -0.         -415.33448968   -0.        ]

In [17]:
Math(problem_lagrange.latexify())


Out[17]:
$$ \begin{aligned} \text{minimize}_{\beta} & f(\beta) + g(\beta) \\ f(\beta) &= C\left\|X_{1}\beta - Y_{1}\right\|^2_2 \\ g(\beta) &= \lambda_{2} \|\beta\|_1 \\ \end{aligned} $$

In [18]:
implied_bound = np.fabs(coef_lagrange).sum()
print(implied_bound)


1114.75237512

In [19]:
bound_constraint = rr.l1norm(10, bound=implied_bound)
problem_bound = rr.simple_problem(loss, bound_constraint)
Math(problem_bound.latexify())


Out[19]:
$$ \begin{aligned} \text{minimize}_{\beta} & f(\beta) + g(\beta) \\ f(\beta) &= C\left\|X_{1}\beta - Y_{1}\right\|^2_2 \\ g(\beta) &= I^{\infty}(\|\beta\|_1 \leq \delta_{2}) \\ \end{aligned} $$

In [12]:
coef_bound = problem_bound.solve(tol=1.e-12)
print(coef_bound)


[   0.           -0.         -479.01914146 -149.17251245   -0.           -0.
   71.22616687    0.         -415.33455434    0.        ]

In [13]:
np.linalg.norm(coef_bound - coef_lagrange) / np.linalg.norm(coef_lagrange)


Out[13]:
7.9799131568331436e-07

In [14]:
Math(problem_bound.latexify())


Out[14]:
$$ \begin{aligned} \text{minimize}_{\beta} & f(\beta) + g(\beta) \\ f(\beta) &= C\left\|X_{1}\beta - Y_{1}\right\|^2_2 \\ g(\beta) &= I^{\infty}(\|\beta\|_1 \leq \delta_{2}) \\ \end{aligned} $$

Comparison to LARS


In [9]:
%%R -o L,B
lars_fit = lars(X, Y, type='lasso', normalize=FALSE, intercept=FALSE)
L = lars_fit$lambda
B = lars_fit$beta

In [10]:
solns = []
for lagrange in L:
    penalty_lagrange.lagrange = lagrange
    soln = problem_lagrange.solve(tol=1.e-12)
    solns.append(soln.copy())
solns.append(np.dot(np.linalg.pinv(X), Y))
solns = np.array(solns)

In [11]:
print [np.linalg.norm(B[i] - solns[i]) / max(np.linalg.norm(B[i]), np.linalg.norm(solns[i]),1) for i in range(solns.shape[0])]


[0.0, 3.7608696193502326e-05, 7.2955089079699002e-06, 5.4337965592815922e-06, 2.7212899296094342e-06, 3.8257292740352516e-06, 1.6269402371375937e-05, 7.5366116712411135e-06, 0.00016712306582799966, 1.5524617950935121e-05, 6.0066322936878123e-06, 2.4327093955342122e-05, 3.0114314630637587e-14]

Path of solutions


In [12]:
%%R -o B2
library(glmnet)
A=glmnet(X,Y, lambda=L/nrow(X),standardize=FALSE, intercept=FALSE)
B2 = as.matrix(A$beta)
print(L)
print(A$beta-B[1:12])


---------------------------------------------------------------------------
RInterpreterError                         Traceback (most recent call last)
<ipython-input-12-86117aec0e56> in <module>()
----> 1 get_ipython().run_cell_magic(u'R', u'-o B2', u'library(glmnet)\nA=glmnet(X,Y, lambda=L/nrow(X),standardize=FALSE, intercept=FALSE)\nB2 = as.matrix(A$beta)\nprint(L)\nprint(A$beta-B[1:12])')

/Users/jonathantaylor/ipython/IPython/core/interactiveshell.pyc in run_cell_magic(self, magic_name, line, cell)
   2134             magic_arg_s = self.var_expand(line, stack_depth)
   2135             with self.builtin_trap:
-> 2136                 result = fn(magic_arg_s, cell)
   2137             return result
   2138 

/Users/jonathantaylor/ipython/IPython/extensions/rmagic.pyc in R(self, line, cell, local_ns)

/Users/jonathantaylor/ipython/IPython/core/magic.pyc in <lambda>(f, *a, **k)
    189     # but it's overkill for just that one bit of state.
    190     def magic_deco(arg):
--> 191         call = lambda f, *a, **k: f(*a, **k)
    192 
    193         if callable(arg):

/Users/jonathantaylor/ipython/IPython/extensions/rmagic.pyc in R(self, line, cell, local_ns)
    587                 return_output = False
    588         else:
--> 589             text_result, result = self.eval(code)
    590             text_output += text_result
    591 

/Users/jonathantaylor/ipython/IPython/extensions/rmagic.pyc in eval(self, line)
    196         except (ri.RRuntimeError, ValueError) as exception:
    197             warning_or_other_msg = self.flush() # otherwise next return seems to have copy of error
--> 198             raise RInterpreterError(line, str_to_unicode(str(exception)), warning_or_other_msg)
    199         text_output = self.flush()
    200         ri.set_writeconsole(old_writeconsole)

RInterpreterError: Failed to parse and evaluate line u'library(glmnet)\nA=glmnet(X,Y, lambda=L/nrow(X),standardize=FALSE, intercept=FALSE)\nB2 = as.matrix(A$beta)\nprint(L)\nprint(A$beta-B[1:12])'.
R error message: u'Error in glmnet(X, Y, lambda = L/nrow(X), standardize = FALSE, intercept = FALSE) : \n  unused argument(s) (intercept = FALSE)'
R stdout:
Loading required package: Matrix
Loading required package: lattice
Loaded glmnet 1.7.4

Error in glmnet(X, Y, lambda = L/nrow(X), standardize = FALSE, intercept = FALSE) : 
  unused argument(s) (intercept = FALSE)

In [ ]:
print B2[-1]
print B[-1]
print solns[-2]

In [ ]:
print B2

In [ ]:
beta = np.array(soln['beta'].todense())
soln['lagrange'].shape
beta[1].shape
f1, ax1 = plt.subplots()
[ax1.plot(soln['lagrange'],beta[i]) for i in range(1,11)]
l1norm = np.fabs(beta[1:]).sum(0)
f2, ax2= plt.subplots()
[ax2.plot(l1norm,beta[i]) for i in range(1,11)];

In [ ]:
%%R
l = lars(X, Y)
plot(l)

#library(glmnet)
#g = glmnet(X, Y)

In [ ]: